* lost. The domain will get a spurious event, but it can cope.
*/
vcpu_info(v, evtchn_upcall_pending) = 1;
- for ( i = 0; i < BITS_PER_GUEST_LONG(d); i++ )
+ for ( i = 0; i < BITS_PER_EVTCHN_WORD(d); i++ )
set_bit(i, &vcpu_info(v, evtchn_pending_sel));
return 0;
break;
}
- domctl->u.address_size.size = BITS_PER_GUEST_LONG(d);
+ domctl->u.address_size.size =
+ is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
ret = 0;
rcu_unlock_domain(d);
(test_bit(d->pirq_to_evtchn[irq],
&shared_info(d, evtchn_pending)) ?
'P' : '-'),
- (test_bit(d->pirq_to_evtchn[irq]/BITS_PER_GUEST_LONG(d),
+ (test_bit(d->pirq_to_evtchn[irq] /
+ BITS_PER_EVTCHN_WORD(d),
&vcpu_info(d->vcpu[0], evtchn_pending_sel)) ?
'S' : '-'),
(test_bit(d->pirq_to_evtchn[irq],
t->vector = TRAP_nmi;
t->flags = 0;
- t->cs = !IS_COMPAT(d) ? FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS;
+ t->cs = (is_pv_32on64_domain(d) ?
+ FLAT_COMPAT_KERNEL_CS : FLAT_KERNEL_CS);
t->address = address;
TI_SET_IF(t, 1);
if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
goto svc_out;
- if ( !IS_COMPAT(v->domain) )
- ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
#ifdef CONFIG_COMPAT
+ if ( !is_pv_32on64_vcpu(v) )
+ ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
else
ret = copy_from_guest(c.cmp,
guest_handle_cast(op->u.vcpucontext.ctxt,
void), 1);
+#else
+ ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
#endif
ret = ret ? -EFAULT : 0;
if ( v != current )
vcpu_unpause(v);
- if ( !IS_COMPAT(v->domain) )
- ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
#ifdef CONFIG_COMPAT
+ if ( !is_pv_32on64_vcpu(v) )
+ ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
else
ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
void), c.cmp, 1);
+#else
+ ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
#endif
if ( copy_to_guest(u_domctl, op, 1) || ret )
return 1;
if ( !test_bit (port, &shared_info(d, evtchn_mask)) &&
- !test_and_set_bit(port / BITS_PER_GUEST_LONG(d),
+ !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
&vcpu_info(v, evtchn_pending_sel)) )
{
vcpu_mark_events_pending(v);
*/
if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
test_bit (port, &shared_info(d, evtchn_pending)) &&
- !test_and_set_bit (port / BITS_PER_GUEST_LONG(d),
+ !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
&vcpu_info(v, evtchn_pending_sel)) )
{
vcpu_mark_events_pending(v);
test_bit(v->virq_to_evtchn[VIRQ_DEBUG],
&shared_info(d, evtchn_mask)),
test_bit(v->virq_to_evtchn[VIRQ_DEBUG] /
- BITS_PER_GUEST_LONG(d),
+ BITS_PER_EVTCHN_WORD(d),
&vcpu_info(v, evtchn_pending_sel)));
send_guest_vcpu_virq(v, VIRQ_DEBUG);
}
#define xen_t_buf t_buf
CHECK_t_buf;
#undef xen_t_buf
-#define TB_COMPAT IS_COMPAT(dom0)
#else
#define compat_t_rec t_rec
-#define TB_COMPAT 0
#endif
/* opt_tbuf_size: trace buffer size (in pages) */
bufsize = sizeof(struct xenoprof_buf);
i = sizeof(struct event_log);
#ifdef CONFIG_COMPAT
- d->xenoprof->is_compat = IS_COMPAT(is_passive ? dom0 : d);
+ d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? dom0 : d);
if ( XENOPROF_COMPAT(d->xenoprof) )
{
bufsize = sizeof(struct compat_oprof_buf);
#define is_pv_32on64_domain(d) (0)
#endif
#define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain))
-#define IS_COMPAT(d) (is_pv_32on64_domain(d))
struct trap_bounce {
uint32_t error_code;
int switch_compat(struct domain *);
int switch_native(struct domain *);
-#define BITS_PER_GUEST_LONG(d) \
- (!IS_COMPAT(d) ? BITS_PER_LONG : COMPAT_BITS_PER_LONG)
-
#else
#define compat_handle_is_null(hnd) 0
-#define BITS_PER_GUEST_LONG(d) BITS_PER_LONG
-
#endif
#endif /* __XEN_COMPAT_H__ */
extern struct domain *dom0;
#ifndef CONFIG_COMPAT
-#define MAX_EVTCHNS(d) NR_EVENT_CHANNELS
+#define BITS_PER_EVTCHN_WORD(d) BITS_PER_LONG
#else
-#define MAX_EVTCHNS(d) (!IS_COMPAT(d) ? \
- NR_EVENT_CHANNELS : \
- sizeof(unsigned int) * sizeof(unsigned int) * 64)
+#define BITS_PER_EVTCHN_WORD(d) (has_32bit_shinfo(d) ? 32 : BITS_PER_LONG)
#endif
+#define MAX_EVTCHNS(d) (BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d) * 64)
#define EVTCHNS_PER_BUCKET 128
#define NR_EVTCHN_BUCKETS (NR_EVENT_CHANNELS / EVTCHNS_PER_BUCKET)
#define IS_PRIV(_d) ((_d)->is_privileged)
#define IS_PRIV_FOR(_d, _t) (IS_PRIV(_d) || ((_d)->target && (_d)->target == (_t)))
-#ifndef IS_COMPAT
-#define IS_COMPAT(d) 0
-#endif
-
#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
#define is_hvm_domain(d) ((d)->is_hvm)